Close

@InProceedings{VegaFeitQuirHapp:2016:SiSaFa,
               author = "Vega, Pedro Juan Soto and Feitosa, Raul Queiroz and Quirita, 
                         Victor Hugo Ayma and Happ, Patrick Nigri",
          affiliation = "{Pontifical Catholic University of Rio de Janeiro} and Pontifical 
                         Catholic University of Rio de Janeiro, Rio de Janeiro State 
                         University and {Pontifical Catholic University of Rio de Janeiro} 
                         and {Pontifical Catholic University of Rio de Janeiro}",
                title = "Single Sample Face Recognition from Video via Stacked Supervised 
                         Auto-encoder",
            booktitle = "Proceedings...",
                 year = "2016",
               editor = "Aliaga, Daniel G. and Davis, Larry S. and Farias, Ricardo C. and 
                         Fernandes, Leandro A. F. and Gibson, Stuart J. and Giraldi, Gilson 
                         A. and Gois, Jo{\~a}o Paulo and Maciel, Anderson and Menotti, 
                         David and Miranda, Paulo A. V. and Musse, Soraia and Namikawa, 
                         Laercio and Pamplona, Mauricio and Papa, Jo{\~a}o Paulo and 
                         Santos, Jefersson dos and Schwartz, William Robson and Thomaz, 
                         Carlos E.",
         organization = "Conference on Graphics, Patterns and Images, 29. (SIBGRAPI)",
            publisher = "IEEE Computer Society´s Conference Publishing Services",
              address = "Los Alamitos",
             keywords = "auto-encoder, face recognition, surveillance.",
             abstract = "This work proposes and evaluates strategies based on Stacked 
                         Supervised Auto-Encoders (SSAE) for face representation in video 
                         surveillance applications. The study focuses on the identification 
                         task with a single sample per person (SSPP) in the gallery. 
                         Variations in terms of pose, facial expression, illumination and 
                         occlusion are approached in two ways. First, the SSAE extracts 
                         features from face images, which are robust to such variations. 
                         Second, we propose methods to exploit the multiple samples per 
                         persons probes (MSPPP) that can be extracted from video sequences. 
                         Three variants of the proposed method are compared upon HONDA/UCSD 
                         and VIDTIMIT video datasets. The experimental results demonstrate 
                         that strategies combining SSAE and MSPPP are able to outperform 
                         other SSPP methods, such a local binary patterns, in face 
                         recognition from video.",
  conference-location = "S{\~a}o Jos{\'e} dos Campos, SP, Brazil",
      conference-year = "4-7 Oct. 2016",
                  doi = "10.1109/SIBGRAPI.2016.022",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI.2016.022",
             language = "en",
                  ibi = "8JMKD3MGPAW/3M55GN2",
                  url = "http://urlib.net/ibi/8JMKD3MGPAW/3M55GN2",
           targetfile = "PID4367205.pdf",
        urlaccessdate = "2024, May 03"
}


Close